Esempio n. 1
0
 def registerInitialState(self, gameState):
   "Initializes beliefs and inference modules"
   CaptureAgent.registerInitialState(self, gameState)
   self.inferenceModules = [ExactInference(RandomGhost(a), self) for a in self.getOpponents(gameState)]
   for inference in self.inferenceModules: inference.initialize(gameState)
   self.enemyBeliefs = [inf.getBeliefDistribution() for inf in self.inferenceModules]
   self.firstMove = True
Esempio n. 2
0
  def registerInitialState(self, gameState):
	"""
	This method handles the initial setup of the
	agent to populate useful fields (such as what team
	we're on).
	A distanceCalculator instance caches the maze distances
	between each pair of positions, so your agents can use:
	self.distancer.getDistance(p1, p2)
	IMPORTANT: This method may run for at most 15 seconds.
	"""
	# Blue index = 1,3,   red = 0,2
	start = time.time()
	CaptureAgent.registerInitialState(self, gameState)
	self.dieCount = 0
	if self.index < 2:	#one attacker and one defender at the beginning
		self.is_attacker = True
	else:
		self.is_attacker = False
	self.opponents = self.getOpponents(gameState)
	self.teammate = self.getTeammate()
	f = open('data/input_data','r')
	self.factors = [int(i) for i in f.read().replace('\n','').split(' ')]
	self.is_ghost = True
	self.carryingFood = 0 #food you are carrying

	if self.red:
	  self.middle_line = gameState.data.layout.width/2-1
	else:
	  self.middle_line = gameState.data.layout.width/2
	self.dangerousness = self.getDangerousness(gameState)
	if debug:
		print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)
Esempio n. 3
0
  def registerInitialState(self, gameState):
    """
    This method handles the initial setup of the
    agent to populate useful fields (such as what team
    we're on).

    A distanceCalculator instance caches the maze distances
    between each pair of positions, so your agents can use:
    self.distancer.getDistance(p1, p2)

    IMPORTANT: This method may run for at most 15 seconds.
    """

    '''
    Make sure you do not delete the following line. If you would like to
    use Manhattan distances instead of maze distances in order to save
    on initialization time, please take a look at
    CaptureAgent.registerInitialState in captureAgents.py.
    '''
    CaptureAgent.registerInitialState(self, gameState)

    '''
    Your initialization code goes here, if you need any.
    '''
    self.start = gameState.getAgentPosition(self.index)
Esempio n. 4
0
  def registerInitialState(self, gameState):
    CaptureAgent.registerInitialState(self, gameState)
    gameStr = str(gameState)
    # rows = gameStr.count("\n") + 1
    # cols = (len(gameStr) - (rows - 1)) / rows
    rows = 30
    cols = 14

    moveMap = {}

    print rows, cols
    print gameStr
    for x in xrange(rows):
      for y in xrange(cols):
        print x, y
        if not gameState.hasWall(x, y):
          moveMap[(x, y)] = self.getPotentialMoves(gameState, x, y)

    for (x, y) in moveMap:
      length = len(moveMap[(x, y)])
      if length != 2:
        V.append((x, y))

    for (x, y) in moveMap:
      if (x, y) not in V:
        E.append(self.dfsToVertex(moveMap, (x, y), []))
Esempio n. 5
0
  def registerInitialState(self, gameState):
    """
    This method handles the initial setup of the
    agent to populate useful fields (such as what team
    we're on).

    A distanceCalculator instance caches the maze distances
    between each pair of positions, so your agents can use:
    self.distancer.getDistance(p1, p2)

    IMPORTANT: This method may run for at most 15 seconds.
    """

    '''
    Make sure you do not delete the following line. If you would like to
    use Manhattan distances instead of maze distances in order to save
    on initialization time, please take a look at
    CaptureAgent.registerInitialState in captureAgents.py.
    '''
    CaptureAgent.registerInitialState(self, gameState)

    '''
    Your initialization code goes here, if you need any.
    '''
    # self.evaluationFunction = util.lookup('evaluate', globals())
    self.evaluationFunction = gameState.getScore()
    self.myTeam = self.getTeam(gameState)
    self.opponents = self.getOpponents(gameState)
    print "Opponents for agent ", self.index, "are ", self.opponents
    self.agentIndices = sorted(self.myTeam + self.opponents)
    self.treeDepth = 2 # int('1') didn't break either
Esempio n. 6
0
    def registerInitialState(self, gameState):
        """
        This method handles the initial setup of the
        agent to populate useful fields (such as what team
        we're on).

        A distanceCalculator instance caches the maze distances
        between each pair of positions, so your agents can use:
        self.distancer.getDistance(p1, p2)

        IMPORTANT: This method may run for at most 15 seconds.
        """

        '''
        Make sure you do not delete the following line. If you would like to
        use Manhattan distances instead of maze distances in order to save
        on initialization time, please take a look at
        CaptureAgent.registerInitialState in captureAgents.py.
        '''
        CaptureAgent.registerInitialState(self, gameState)
        #set up data repository
        if self.red:
            if not TeamData.RedData:
                TeamData.RedData=TeamData(gameState, self.getTeam(gameState), self.getOpponents(gameState), self)
            self.data=TeamData.RedData

        else:
            if not TeamData.BlueData:
                TeamData.BlueData=TeamData(gameState, self.getTeam(gameState), self.getOpponents(gameState), self)
            self.data=TeamData.BlueData

        self.legalPositions=self.data.legalPositions
        self.offensive = self.data.getOffensive()
  def registerInitialState(self, gameState):
    CaptureAgent.registerInitialState(self, gameState)

    global beliefs
    global validPositions

    self.distributions = []
    self.lastAction = None
    self.lastFood = None
    self.lastPositions = {}
    self.agentFoodEaten = {}
    self.positionCount = {}
    self.optimalBlocks = None
    self.lastBestBlock = None
    self.bestDefensePositions = None
    self.trappedFood = None
    self.lastMoves = []
    self.pacmanTime = {}

    for enemyIndex in self.getOpponents(gameState):
        self.agentFoodEaten[enemyIndex] = 0
        self.pacmanTime[enemyIndex] = 0

    if len(validPositions) == 0:
      # All positions are those that are not walls
      validPositions = gameState.getWalls().asList(False)

      # We know that each enemy must be at its initial position at registration
      for enemyIndex in self.getOpponents(gameState):
        self.establishLocation(enemyIndex, gameState.getInitialAgentPosition(enemyIndex))
Esempio n. 8
0
  def registerInitialState(self, gameState):
    self.start = gameState.getAgentPosition(self.index)
    CaptureAgent.registerInitialState(self, gameState)
    self.debugging = False
    self.stationaryTolerance = random.randint(6,16)
    self.depth = 6

    "G A M E  K E Y  L O C A T I O N S  D E T E R M I N A T I O N"
    if self.red:
        leftEdge = gameState.data.layout.width / 2
        rightEdge =  gameState.data.layout.width - 2
        self.safeColumn = leftEdge - 1
        self.opSafeColumn = leftEdge
    else:
        leftEdge = 1
        rightEdge = gameState.data.layout.width / 2
        self.safeColumn = rightEdge
        self.opSafeColumn = rightEdge - 1

    self.safeSpaces = []
    self.opSafeSpaces = []
    for h in xrange(1,gameState.data.layout.height-1):
        if not gameState.data.layout.isWall((self.safeColumn, h)):
               self.safeSpaces += [(self.safeColumn, h)]
        if not gameState.data.layout.isWall((self.opSafeColumn, h)):
               self.opSafeSpaces += [(self.opSafeColumn, h)]

    if self.debugging:
        print "Coloring my safe column white"
        self.debugDraw([(self.safeColumn, el) for el in xrange(0, gameState.data.layout.height)], [1,1,1], clear=False)

        print "Coloring my safe spaces", self.safeSpaces, "blue"
        self.debugDraw(self.safeSpaces, [0,0,1], clear=False)
Esempio n. 9
0
  def chooseAction(self, gameState):
    """
  Picks among actions randomly.
  """
    actions = gameState.getLegalActions(self.index)

    '''
  You should change this in your own agent.
  '''
    optimalAction = "Stop"
    danger = False
    threshold = 8
    for enemy in self.enemyTeam:
      if CaptureAgent.getMazeDistance(self, getAgentDistances(enemy), gameState.getAgentPosition(self.index)) < threshold:
        danger = True

    if danger == True:
      optimalAction = random.choice(actions)
    else:
      optimal = 999
      for action in actions:
        successor = self.getSuccessor(gameState, action)
        foodList = self.getFood(successor).asList()
        myPos = successor.getAgentState(self.index).getPosition()
        minDistance = min([(self.getMazeDistance(myPos, food), food) for food in foodList])
        food = minDistance[1]

        distance = CaptureAgent.getMazeDistance(self, successor.getAgentPosition(self.index), food)
        if distance < optimal:
          optimal = distance
          optimalAction = action

    return optimalAction
Esempio n. 10
0
 def __init__(self, index):
   CaptureAgent.__init__(self, index)
   self.firstTurnComplete = False
   self.startingFood = 0
   self.theirStartingFood = 0
   
   self.legalPositions = None
   self.estimate = util.Counter()
Esempio n. 11
0
 def registerInitialState(self, gameState):
     CaptureAgent.registerInitialState(self, gameState)
     #self.particleFilters = [ParticleFilter(gameState.getAgent(opponentIndex), opponentIndex) for opponentIndex in self.getOpponents(gameState)]
     self.mostLikelyPositions = {}
     self.particleFilters = {}
     for i in self.getOpponents(gameState):
         self.particleFilters[i] = ParticleFilter(gameState.getAgentState(i), i, self.index)
         self.particleFilters[i].initializeUniformly(gameState) #FIXME Should these all initially point toward the corner?
Esempio n. 12
0
  def registerInitialState(self, gameState):
    """
    This method handles the initial setup of the
    agent to populate useful fields (such as what team
    we're on).

    A distanceCalculator instance caches the maze distances
    between each pair of positions, so your agents can use:
    self.distancer.getDistance(p1, p2)

    IMPORTANT: This method may run for at most 15 seconds.
    """

    '''
    Make sure you do not delete the following line. If you would like to
    use Manhattan distances instead of maze distances in order to save
    on initialization time, please take a look at
    CaptureAgent.registerInitialState in captureAgents.py.
    '''
    CaptureAgent.registerInitialState(self, gameState)

    '''
    Your initialization code goes here, if you need any.
    '''
    self.initialize(gameState)
    self.initializeBeliefs(gameState)
    self.isRed = self.red

    #Get width and height of maze
    self.getMazeDimensions(gameState)

    #print self.legalPositions , "Legal"
    #print self.walls, "Wall"

    '''
    HoverZones - Regions where our ghosts will hangout
    Basically a vertical band near the transition area

    '''
    self.hoverZones = []
    self.initializeHoverZones(gameState)

    #print "hoverZones ", self.hoverZones

    quarterHeight = len(self.hoverZones) / 4
    threeFourthsHeight = 3 * len(self.hoverZones) / 4
    if self.index < 2:
      x, y = self.hoverZones[quarterHeight]
    else:
      x, y = self.hoverZones[threeFourthsHeight]

    self.target = (x, y)


    #How many moves should our agent attack?
    self.moves = 0
    #Has the pacman just been killed?
    self.pacmanKill = False
 def registerInitialState(self,gameState):
   CaptureAgent.registerInitialState(self, gameState) #Pointless comment
   self.inferenceMods = {i:ExactInference(i,self.index,gameState) for i in self.getOpponents(gameState)}
   if self.isTimerTracker:
     self.isTimerTracker=True
     RaptorAgent.isTimerTracker=False
   self.foodNum = 0
   if (not gameState.isOnRedTeam(self.index)):
       RaptorAgent.weights['score']=-abs(RaptorAgent.weights['score'])
Esempio n. 14
0
  def registerInitialState(self, gameState):
    """
    Initialize the beliefs of the enemies uniformly
    and many related operations
    """
    start = time.time()

    CaptureAgent.registerInitialState(self, gameState)

    global beliefs

    # legalPositions is a useful variable. it's not provided! let's construct it
    width = gameState.data.layout.width
    height = gameState.data.layout.height
    self.legalPositions = [(x, y) for x in range(width) for y in range(height) if not gameState.hasWall(x, y)]

    # set legalPositions for defending and offending respectively
    self.defensiveLegalPositions = [p for p in self.legalPositions if self.isOurTerrain(gameState, p)]
    self.offensiveLegalPositions = [p for p in self.legalPositions if not (p in self.defensiveLegalPositions)]

    # initialize beliefs according to legalPositions
    for enemyId in self.getOpponents(gameState):
      self.initializeBeliefs(gameState, enemyId, initial = True)

    # set availableActions for each grid
    self.setNeighbors(gameState)

    # set distances on each side
    # using global buffer to save time
    global buff
    if buff == None:
      buff = util.Counter()
      buff['dd'] = self.getDistances(gameState, self.defensiveLegalPositions)
      #buff['dsd'] = self.getSecondDistances(gameState, buff['dd'], self.defensiveLegalPositions)
      buff['od'] = self.getDistances(gameState, self.offensiveLegalPositions)
    self.defensiveDistance = buff['dd']
    #self.defensiveSecondDistance = buff['dsd']
    self.offensiveDistance = buff['od']

    # set boundaries - entries of the enemy!
    self.setBoundary(gameState)

    # set trainsition model
    self.transition = util.Counter()
    self.transition['favor'] = 0.8
    self.transition['indifferent'] = 0.05

    self.helpDefending = False # initialy, it's not defending
    self.alert = 3
    # these are for offensive agents.
    if self.__class__.__name__ == "OffensiveReflexAgent":
      self.role = "offensive"
      self.disToDefender = None
    else:
      self.role = "defensive"
      self.crazy = False # it would be crazy if scared
Esempio n. 15
0
    def registerInitialState(self, gameState):
        CaptureAgent.registerInitialState(self, gameState)
        self.ready = False
        myTeam = self.getTeam(gameState)
        self.team = {}
        self.team[myTeam[0]] = 1
        self.team[myTeam[1]] = 2

        startx = gameState.getWalls().width / 2
        starty = gameState.getWalls().height / 2
        startPos = []
        if self.getTeam(gameState)[0] % 2 != 0:
            startx -= 1
        self.temStartPoint = (startx, starty)
        minDist = 99999
        myPos = gameState.getAgentState(self.index).getPosition()
        while starty >= 0:
            if gameState.hasWall(startx, starty) == False:
                dist = self.getMazeDistance(myPos, (startx, starty))
                if dist < minDist:
                    self.Bstart = (startx, starty)
                    minDist = dist
            starty -= 1

        startx, starty = self.temStartPoint
        minDist = 99999
        for i in xrange(gameState.getWalls().height - starty):
            if gameState.hasWall(startx, starty) == False:
                dist = self.getMazeDistance(myPos, (startx, starty))
                if dist < minDist:
                    self.Astart = (startx, starty)
                    minDist = dist
            starty += 1

        self.start = (16, 15)
        self.status = None
        basePoint = gameState.getAgentState(myTeam[1]).getPosition()
        x = basePoint[0]
        y = basePoint[1]
        self.opponentStatus = {}
        if self.getTeam(gameState)[0] % 2 != 0:  ## set the origin point,the team is blue
            self.teamName = "blue"
            if self.index == 1:
                self.basePoint = [(x, y - 1), (float(x), float(y - 1))]
            elif self.index == 3:
                self.basePoint = [(x, y), (float(x), float(y))]
            self.opponentStatus[0] = False
            self.opponentStatus[2] = False
        else:
            self.teamName = "red"
            if self.index == 0:
                self.basePoint = [(x, y + 1), (float(x), float(y + 1))]
            elif self.index == 2:
                self.basePoint = [(x, y), (float(x), float(y))]
            self.opponentStatus[1] = False
            self.opponentStatus[3] = False
Esempio n. 16
0
 def __init__( self, index, timeForComputing = .1 ):
     CaptureAgent.__init__( self, index, timeForComputing)
     print self.red, index, timeForComputing
     self.visibleAgents = []
     self.foodLeft = 0
     self.foodEaten = 0
     self.isPacman = False
     self.a = []
     self.first = True
     self.size = 0
Esempio n. 17
0
 def __init__(self, index):
   CaptureAgent.__init__(self, index)
   
   self.legalPositions = []
   
   "tuple with (boolean, belief distribution, actual/most probable position)"
   "TODO: Assumes 6 agents, will not always work"
   self.positions = [None, None, None, None, None, None] 
   
   self.firstMove = True
 def __init__(self, index, timeForComputing=.1):
     CaptureAgent.__init__(self, index, timeForComputing)
     team.append(self)
     
     if self.index % 2 == 0:
         self.isRed = True
         self.middle = (ReflexCaptureAgent.MAP_WIDTH / 4, ReflexCaptureAgent.MAP_HEIGHT / 2)
     else:
         self.isRed = False
         self.middle = (ReflexCaptureAgent.MAP_WIDTH * 3 / 4), (ReflexCaptureAgent.MAP_HEIGHT / 2)
Esempio n. 19
0
  def registerInitialState(self, gameState):
    CaptureAgent.registerInitialState(self, gameState)

    # Get all non-wall positions on the board
    self.legalPositions = gameState.data.layout.walls.asList(False)

    # Initialize position belief distributions for opponents
    self.positionBeliefs = {}
    for opponent in self.getOpponents(gameState):
      self.initializeBeliefs(opponent)
Esempio n. 20
0
  def __init__(self, index, timeForComputing = .1):
    CaptureAgent.__init__(self, index, timeForComputing)

    self.depth = 4
    self.numParticles = 10
    self.steps = [(0, 0), (0, 1), (1, 0), (-1, 0), (0, -1)]

    self.teammateLocations = {}
    self.enemyParticles = {}
    self.lastAction = None
Esempio n. 21
0
  def registerInitialState(self, gameState):

    CaptureAgent.registerInitialState(self, gameState)
    self.boundary_top = True
    if gameState.getAgentState(self.index).getPosition()[0] == 1:
      self.isRed = True
    else:
      self.isRed = False

    self.boundaries = self.boundaryTravel(gameState)
    self.treeDepth = 3
Esempio n. 22
0
 def __init__(self, index):
     CaptureAgent.__init__(self, index)
     self.firstTurnComplete = False
     self.startingFood = 0
     self.theirStartingFood = 0
     self.discount = .9
     self.alpha = 0.002
     self.featureHandler = FeatureHandler()
     self.agentType = 'basicQLearningAgent'
     self.weights = None
     self.explorationRate = 0.3
Esempio n. 23
0
 def __init__(self, index, alpha, epsilon):
   CaptureAgent.__init__(self, index)
   # self.weights = util.Counter()
   self.alpha = alpha #learning rate--higher means learn in larger steps
   self.epsilon = epsilon #exploration rate--higher means explore more
   self.firstTurnComplete = False
   self.startingFood = 0
   self.theirStartingFood = 0
   
   #used for estimating the enemy pos
   self.legalPositions = None
   self.estimate = util.Counter()
Esempio n. 24
0
 def registerInitialState(self, gameState):
     global defendFoodList
     global g_intorState
     CaptureAgent.registerInitialState(self, gameState)
     self.start = gameState.getAgentPosition(self.index)
     self.oppIndces = self.getOpponents(gameState)
     self.teamIndces = self.getTeam(gameState)
     self.walls = gameState.getWalls()
     self.deadEnd = self.buildDeadEnd(gameState)
     self.pointToWin = 100
     self.wallMemory = gameState.getWalls().deepCopy()
     self.blockers = []
     defendFoodList = self.getFoodYouAreDefending(gameState).asList()
Esempio n. 25
0
  def registerInitialState(self, gameState):
    CaptureAgent.registerInitialState(self, gameState)
    
    # Initialize Particles
    for index in xrange(gameState.getNumAgents()):
      if index == self.index:
        continue

      if self.isTeammate(gameState, index):
        self.teammateLocations[index] = gameState.getInitialAgentPosition(index)
      else:
        self.enemyParticles[index] = util.Counter()
        self.enemyParticles[index][gameState.getInitialAgentPosition(index)] = self.numParticles
Esempio n. 26
0
  def registerInitialState(self, gameState):
    CaptureAgent.registerInitialState(self, gameState)

    global beliefs
    global validPositions

    self.lastFood = None

    if len(validPositions) == 0:
      # All positions are those that are not walls
      validPositions = gameState.getWalls().asList(False)

      # We know that each enemy must be at its initial position at registration
      for enemyIndex in self.getOpponents(gameState):
        self.establishLocation(enemyIndex, gameState.getInitialAgentPosition(enemyIndex))
Esempio n. 27
0
  def registerInitialState(self, gameState):
    """
    This method handles the initial setup of the
    agent to populate useful fields (such as what team
    we're on).

    A distanceCalculator instance caches the maze distances
    between each pair of positions, so your agents can use:
    self.distancer.getDistance(p1, p2)

    IMPORTANT: This method may run for at most 15 seconds.
    """

    '''
    Make sure you do not delete the following line. If you would like to
    use Manhattan distances instead of maze distances in order to save
    on initialization time, please take a look at
    CaptureAgent.registerInitialState in captureAgents.py.
    '''
    CaptureAgent.registerInitialState(self, gameState)

    '''
    Your initialization code goes here, if you need any.
    '''
    self.start = gameState.getAgentPosition(self.index)
    self.walls = gameState.getWalls()
    top = 0
    bot = self.walls.height
    for food in self.getFood(gameState).asList():
      if food[1] < bot:
        bot = food[1]
      if food[1] > top:
        top = food[1]
    food1,food2 = [], []
    # for food in self.getFood(gameState).asList():
    #   if food[1] > bot+(top-bot)/2:
    #     food1.append(food)
    #   else:
    #     food2.append(food)
    for y in range(self.walls.height):
      for x in range(self.walls.width):
        if (x,y) in self.getFood(gameState).asList():
          if len(food2) <= len(self.getFood(gameState).asList())/2:
            food2.append((x,y))
    for food in self.getFood(gameState).asList():
      if food not in food2:
        food1.append(food)
    self.foodOne,self.foodTwo = food1,food2
Esempio n. 28
0
    def registerInitialState(self, gameState):
      """
      This method handles the initial setup of the
      agent to populate useful fields (such as what team
      we're on).

      A distanceCalculator instance caches the maze distances
      between each pair of positions, so your agents can use:
      self.distancer.getDistance(p1, p2)

      IMPORTANT: This method may run for at most 15 seconds.
      """
      CaptureAgent.registerInitialState(self, gameState)

      self.start = gameState.getAgentPosition(self.index)
      self.startPos=gameState.getAgentState(self.index).getPosition()
Esempio n. 29
0
  def registerInitialState(self, gameState):
    """
    This method handles the initial setup of the
    agent to populate useful fields (such as what team
    we're on). 
    
    A distanceCalculator instance caches the maze distances
    between each pair of positions, so your agents can use:
    self.distancer.getDistance(p1, p2)

    IMPORTANT: This method may run for at most 15 seconds.
    """

    ''' 
    Make sure you do not delete the following line. If you would like to
    use Manhattan distances instead of maze distances in order to save
    on initialization time, please take a look at
    CaptureAgent.registerInitialState in captureAgents.py. 
    '''
    CaptureAgent.registerInitialState(self, gameState)

    # Store team and enemy indices
    self.teamIndices = self.getTeam(gameState)
    self.enemyIndices = self.getOpponents(gameState)

    # Check how recently we were near the enemy to check if we've knocked him out
    self.nearEnemyCounter = 0

    # Set up particle filters to track enemy locations
    self.enemyLocFilters = {}
    for i in self.enemyIndices:
      self.enemyLocFilters[i] = (ParticleFilter(gameState, i,
                              gameState.getInitialAgentPosition(i)))


    # Dict of qValues with (state, action) tuples as keys
    self.qValues = util.Counter()

    self.modes = ['Offense', 'Defense']
    self.currMode = 'Offense'
    self.discount = 0.5
    self.learningRate = 0.5

    self.initializeWeights()

    test += 1
    print(test)
Esempio n. 30
0
  def registerInitialState(self, gameState):
    CaptureAgent.registerInitialState(self, gameState)

    """
    Initialize all variables
    """
    height = gameState.data.layout.height
    width = gameState.data.layout.width
    walls = gameState.data.layout.walls
    curr_state = gameState.getAgentState(self.index)
    curr_pos = gameState.getAgentState(self.index).getPosition()
    us = self.getTeam(gameState)
    them = self.getOpponents(gameState)
    our_food = self.getFoodYouAreDefending(gameState)
    their_food = self.getFood(gameState)
    score = self.getScore(gameState)
    capsules = self.getCapsules(gameState)


    food_grid = self.getFoodYouAreDefending(gameState)
    halfway = food_grid.width / 2
    if self.red:    
      xrange = range(halfway)
    else:       
      xrange = range(halfway, food_grid.width)
    for y in range(food_grid.height):
      for x in xrange:
        food_grid[x][y] = True
    self.our_side = food_grid


    """
    HMM for reading where opponent is
    """
    self.hmm_list = dict([(index, util.Counter()) for 
      index in self.getOpponents(gameState)])
    if self.red:
      self.agentsOnTeam = gameState.getRedTeamIndices()
    else:
      self.agentsOnTeam = gameState.getBlueTeamIndices()
    self.legalPositions = [p for p in gameState.getWalls().asList(False)]
    for dist in self.hmm_list.values():
      # initializes randomly over all positions
      for p in self.legalPositions:
        dist[p] = 1
      dist.normalize()
 def registerInitialState(self, gameState):
     CaptureAgent.registerInitialState(self, gameState)
     self.start = gameState.getAgentPosition(self.index)
     self.registerTeam(self.getTeam(gameState))
     self.buddyIndex = [i for i in self.agentsOnTeam if i != self.index][0]
Esempio n. 32
0
 def registerInitialState(self, gameState):
   CaptureAgent.registerInitialState(self, gameState)
   self.distancer.getMazeDistances()
Esempio n. 33
0
 def __init__(self, index):
   CaptureAgent.__init__(self, index)
Esempio n. 34
0
 def registerInitialState(self, gameState):
   CaptureAgent.registerInitialState(self, gameState)
   self.distancer.getMazeDistances()
   print  dir(gameState)
   print gameState.getWalls()
Esempio n. 35
0
 def registerInitialState(self, gameState):
     self.start = gameState.getAgentPosition(self.index)
     self.isOffensive = True  #By default, assign offensive method
     self.indices = self.getTeam(gameState)
     self.walls = gameState.getWalls()
     CaptureAgent.registerInitialState(self, gameState)
  def __init__( self, index, timeForComputing = .2 ):
    CaptureAgent.__init__(self,index,timeForComputing = .1)

    self.to_red_dists = {}
    self.to_blue_dists = {}
    self.opp_dic = {}
Esempio n. 37
0
 def registerInitialState(self, gameState):
     CaptureAgent.registerInitialState(self, gameState)
     self.player = PlayerState(self, gameState)
     self.stage = StageSM()
     PlayerState.init_beliefs(self.player, gameState)
Esempio n. 38
0
 def __init__(self, index):
   CaptureAgent.__init__(self, index)
   self.target = None
Esempio n. 39
0
    def registerInitialState(self, gameState):
        CaptureAgent.registerInitialState(self, gameState)

        self.start = gameState.getAgentPosition(self.index)
Esempio n. 40
0
    def registerInitialState(self, gameState):
        """
        This method handles the initial setup of the
        agent to populate useful fields (such as what team
        we're on).

        A distanceCalculator instance caches the maze distances
        between each pair of positions, so your agents can use:
        self.distancer.getDistance(p1, p2)

        IMPORTANT: This method may run for at most 15 seconds.
        """
        '''
        Make sure you do not delete the following line. If you would like to
        use Manhattan distances instead of maze distances in order to save
        on initialization time, please take a look at
        CaptureAgent.registerInitialState in captureAgents.py.
        '''
        CaptureAgent.registerInitialState(self, gameState)
        self.registerIndices(gameState)
        '''
        Your initialization code goes here, if you need any.
        '''
        # utility variables declaration
        self.utilityVariable(gameState)

        # constant value used for finding defensive co ordinates
        const = self.constants()

        # on a specific X, all Y's with no wall, for pacman to patrol
        i = 0
        while i != self.heightGameSpace - 1:
            if self.walls[self.centreX + const][i]:
                g = 0
            else:
                self.entryPoints.append((self.centreX + const, i))
            i = i + 1

        maxIndiceRed = 0
        maxIndiceBlue = 0
        for i in range(len(self.redIndices)):
            if self.redIndices[i] > maxIndiceRed:
                maxIndiceRed = self.redIndices[i]
            if self.blueIndices[i] > maxIndiceBlue:
                maxIndiceBlue = self.redIndices[i]

        # making agent biased, 1 agent moves in top location, other in bottom location
        if self.index == maxIndiceRed:
            x, y = self.entryPoints[3 * len(self.entryPoints) / 4]
        elif self.index == maxIndiceBlue:
            x, y = self.entryPoints[3 * len(self.entryPoints) / 4]
        else:
            x, y = self.entryPoints[1 * len(self.entryPoints) / 4]
        self.target = (x, y)

        #enemy location based on prior belief
        global prior
        prior = [util.Counter()] * self.agentCount

        #initial state, agent at starting points
        for i, val in enumerate(prior):
            for opp in self.getOpponents(gameState):
                if i == opp:
                    index1 = gameState.getInitialAgentPosition(i)
                    prior[i][index1] = 1.0
        self.takePosition(gameState)
Esempio n. 41
0
 def registerInitialState(self, gameState):
     self.food_eaten = 0
     self.start = gameState.getAgentPosition(self.index)
     CaptureAgent.registerInitialState(self, gameState)