Esempio n. 1
0
 def registerInitialState(self, gameState):
   "Initializes beliefs and inference modules"
   CaptureAgent.registerInitialState(self, gameState)
   self.inferenceModules = [ExactInference(RandomGhost(a), self) for a in self.getOpponents(gameState)]
   for inference in self.inferenceModules: inference.initialize(gameState)
   self.enemyBeliefs = [inf.getBeliefDistribution() for inf in self.inferenceModules]
   self.firstMove = True
Esempio n. 2
0
  def registerInitialState(self, gameState):
    """
    This method handles the initial setup of the
    agent to populate useful fields (such as what team
    we're on).

    A distanceCalculator instance caches the maze distances
    between each pair of positions, so your agents can use:
    self.distancer.getDistance(p1, p2)

    IMPORTANT: This method may run for at most 15 seconds.
    """

    '''
    Make sure you do not delete the following line. If you would like to
    use Manhattan distances instead of maze distances in order to save
    on initialization time, please take a look at
    CaptureAgent.registerInitialState in captureAgents.py.
    '''
    CaptureAgent.registerInitialState(self, gameState)

    '''
    Your initialization code goes here, if you need any.
    '''
    self.start = gameState.getAgentPosition(self.index)
Esempio n. 3
0
  def registerInitialState(self, gameState):
	"""
	This method handles the initial setup of the
	agent to populate useful fields (such as what team
	we're on).
	A distanceCalculator instance caches the maze distances
	between each pair of positions, so your agents can use:
	self.distancer.getDistance(p1, p2)
	IMPORTANT: This method may run for at most 15 seconds.
	"""
	# Blue index = 1,3,   red = 0,2
	start = time.time()
	CaptureAgent.registerInitialState(self, gameState)
	self.dieCount = 0
	if self.index < 2:	#one attacker and one defender at the beginning
		self.is_attacker = True
	else:
		self.is_attacker = False
	self.opponents = self.getOpponents(gameState)
	self.teammate = self.getTeammate()
	f = open('data/input_data','r')
	self.factors = [int(i) for i in f.read().replace('\n','').split(' ')]
	self.is_ghost = True
	self.carryingFood = 0 #food you are carrying

	if self.red:
	  self.middle_line = gameState.data.layout.width/2-1
	else:
	  self.middle_line = gameState.data.layout.width/2
	self.dangerousness = self.getDangerousness(gameState)
	if debug:
		print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)
Esempio n. 4
0
  def registerInitialState(self, gameState):
    CaptureAgent.registerInitialState(self, gameState)
    gameStr = str(gameState)
    # rows = gameStr.count("\n") + 1
    # cols = (len(gameStr) - (rows - 1)) / rows
    rows = 30
    cols = 14

    moveMap = {}

    print rows, cols
    print gameStr
    for x in xrange(rows):
      for y in xrange(cols):
        print x, y
        if not gameState.hasWall(x, y):
          moveMap[(x, y)] = self.getPotentialMoves(gameState, x, y)

    for (x, y) in moveMap:
      length = len(moveMap[(x, y)])
      if length != 2:
        V.append((x, y))

    for (x, y) in moveMap:
      if (x, y) not in V:
        E.append(self.dfsToVertex(moveMap, (x, y), []))
Esempio n. 5
0
  def registerInitialState(self, gameState):
    self.start = gameState.getAgentPosition(self.index)
    CaptureAgent.registerInitialState(self, gameState)
    self.debugging = False
    self.stationaryTolerance = random.randint(6,16)
    self.depth = 6

    "G A M E  K E Y  L O C A T I O N S  D E T E R M I N A T I O N"
    if self.red:
        leftEdge = gameState.data.layout.width / 2
        rightEdge =  gameState.data.layout.width - 2
        self.safeColumn = leftEdge - 1
        self.opSafeColumn = leftEdge
    else:
        leftEdge = 1
        rightEdge = gameState.data.layout.width / 2
        self.safeColumn = rightEdge
        self.opSafeColumn = rightEdge - 1

    self.safeSpaces = []
    self.opSafeSpaces = []
    for h in xrange(1,gameState.data.layout.height-1):
        if not gameState.data.layout.isWall((self.safeColumn, h)):
               self.safeSpaces += [(self.safeColumn, h)]
        if not gameState.data.layout.isWall((self.opSafeColumn, h)):
               self.opSafeSpaces += [(self.opSafeColumn, h)]

    if self.debugging:
        print "Coloring my safe column white"
        self.debugDraw([(self.safeColumn, el) for el in xrange(0, gameState.data.layout.height)], [1,1,1], clear=False)

        print "Coloring my safe spaces", self.safeSpaces, "blue"
        self.debugDraw(self.safeSpaces, [0,0,1], clear=False)
Esempio n. 6
0
    def registerInitialState(self, gameState):
        """
        This method handles the initial setup of the
        agent to populate useful fields (such as what team
        we're on).

        A distanceCalculator instance caches the maze distances
        between each pair of positions, so your agents can use:
        self.distancer.getDistance(p1, p2)

        IMPORTANT: This method may run for at most 15 seconds.
        """

        '''
        Make sure you do not delete the following line. If you would like to
        use Manhattan distances instead of maze distances in order to save
        on initialization time, please take a look at
        CaptureAgent.registerInitialState in captureAgents.py.
        '''
        CaptureAgent.registerInitialState(self, gameState)
        #set up data repository
        if self.red:
            if not TeamData.RedData:
                TeamData.RedData=TeamData(gameState, self.getTeam(gameState), self.getOpponents(gameState), self)
            self.data=TeamData.RedData

        else:
            if not TeamData.BlueData:
                TeamData.BlueData=TeamData(gameState, self.getTeam(gameState), self.getOpponents(gameState), self)
            self.data=TeamData.BlueData

        self.legalPositions=self.data.legalPositions
        self.offensive = self.data.getOffensive()
Esempio n. 7
0
  def registerInitialState(self, gameState):
    """
    This method handles the initial setup of the
    agent to populate useful fields (such as what team
    we're on).

    A distanceCalculator instance caches the maze distances
    between each pair of positions, so your agents can use:
    self.distancer.getDistance(p1, p2)

    IMPORTANT: This method may run for at most 15 seconds.
    """

    '''
    Make sure you do not delete the following line. If you would like to
    use Manhattan distances instead of maze distances in order to save
    on initialization time, please take a look at
    CaptureAgent.registerInitialState in captureAgents.py.
    '''
    CaptureAgent.registerInitialState(self, gameState)

    '''
    Your initialization code goes here, if you need any.
    '''
    # self.evaluationFunction = util.lookup('evaluate', globals())
    self.evaluationFunction = gameState.getScore()
    self.myTeam = self.getTeam(gameState)
    self.opponents = self.getOpponents(gameState)
    print "Opponents for agent ", self.index, "are ", self.opponents
    self.agentIndices = sorted(self.myTeam + self.opponents)
    self.treeDepth = 2 # int('1') didn't break either
  def registerInitialState(self, gameState):
    CaptureAgent.registerInitialState(self, gameState)

    global beliefs
    global validPositions

    self.distributions = []
    self.lastAction = None
    self.lastFood = None
    self.lastPositions = {}
    self.agentFoodEaten = {}
    self.positionCount = {}
    self.optimalBlocks = None
    self.lastBestBlock = None
    self.bestDefensePositions = None
    self.trappedFood = None
    self.lastMoves = []
    self.pacmanTime = {}

    for enemyIndex in self.getOpponents(gameState):
        self.agentFoodEaten[enemyIndex] = 0
        self.pacmanTime[enemyIndex] = 0

    if len(validPositions) == 0:
      # All positions are those that are not walls
      validPositions = gameState.getWalls().asList(False)

      # We know that each enemy must be at its initial position at registration
      for enemyIndex in self.getOpponents(gameState):
        self.establishLocation(enemyIndex, gameState.getInitialAgentPosition(enemyIndex))
Esempio n. 9
0
 def registerInitialState(self, gameState):
     CaptureAgent.registerInitialState(self, gameState)
     #self.particleFilters = [ParticleFilter(gameState.getAgent(opponentIndex), opponentIndex) for opponentIndex in self.getOpponents(gameState)]
     self.mostLikelyPositions = {}
     self.particleFilters = {}
     for i in self.getOpponents(gameState):
         self.particleFilters[i] = ParticleFilter(gameState.getAgentState(i), i, self.index)
         self.particleFilters[i].initializeUniformly(gameState) #FIXME Should these all initially point toward the corner?
Esempio n. 10
0
  def registerInitialState(self, gameState):
    """
    This method handles the initial setup of the
    agent to populate useful fields (such as what team
    we're on).

    A distanceCalculator instance caches the maze distances
    between each pair of positions, so your agents can use:
    self.distancer.getDistance(p1, p2)

    IMPORTANT: This method may run for at most 15 seconds.
    """

    '''
    Make sure you do not delete the following line. If you would like to
    use Manhattan distances instead of maze distances in order to save
    on initialization time, please take a look at
    CaptureAgent.registerInitialState in captureAgents.py.
    '''
    CaptureAgent.registerInitialState(self, gameState)

    '''
    Your initialization code goes here, if you need any.
    '''
    self.initialize(gameState)
    self.initializeBeliefs(gameState)
    self.isRed = self.red

    #Get width and height of maze
    self.getMazeDimensions(gameState)

    #print self.legalPositions , "Legal"
    #print self.walls, "Wall"

    '''
    HoverZones - Regions where our ghosts will hangout
    Basically a vertical band near the transition area

    '''
    self.hoverZones = []
    self.initializeHoverZones(gameState)

    #print "hoverZones ", self.hoverZones

    quarterHeight = len(self.hoverZones) / 4
    threeFourthsHeight = 3 * len(self.hoverZones) / 4
    if self.index < 2:
      x, y = self.hoverZones[quarterHeight]
    else:
      x, y = self.hoverZones[threeFourthsHeight]

    self.target = (x, y)


    #How many moves should our agent attack?
    self.moves = 0
    #Has the pacman just been killed?
    self.pacmanKill = False
 def registerInitialState(self,gameState):
   CaptureAgent.registerInitialState(self, gameState) #Pointless comment
   self.inferenceMods = {i:ExactInference(i,self.index,gameState) for i in self.getOpponents(gameState)}
   if self.isTimerTracker:
     self.isTimerTracker=True
     RaptorAgent.isTimerTracker=False
   self.foodNum = 0
   if (not gameState.isOnRedTeam(self.index)):
       RaptorAgent.weights['score']=-abs(RaptorAgent.weights['score'])
Esempio n. 12
0
  def registerInitialState(self, gameState):
    """
    Initialize the beliefs of the enemies uniformly
    and many related operations
    """
    start = time.time()

    CaptureAgent.registerInitialState(self, gameState)

    global beliefs

    # legalPositions is a useful variable. it's not provided! let's construct it
    width = gameState.data.layout.width
    height = gameState.data.layout.height
    self.legalPositions = [(x, y) for x in range(width) for y in range(height) if not gameState.hasWall(x, y)]

    # set legalPositions for defending and offending respectively
    self.defensiveLegalPositions = [p for p in self.legalPositions if self.isOurTerrain(gameState, p)]
    self.offensiveLegalPositions = [p for p in self.legalPositions if not (p in self.defensiveLegalPositions)]

    # initialize beliefs according to legalPositions
    for enemyId in self.getOpponents(gameState):
      self.initializeBeliefs(gameState, enemyId, initial = True)

    # set availableActions for each grid
    self.setNeighbors(gameState)

    # set distances on each side
    # using global buffer to save time
    global buff
    if buff == None:
      buff = util.Counter()
      buff['dd'] = self.getDistances(gameState, self.defensiveLegalPositions)
      #buff['dsd'] = self.getSecondDistances(gameState, buff['dd'], self.defensiveLegalPositions)
      buff['od'] = self.getDistances(gameState, self.offensiveLegalPositions)
    self.defensiveDistance = buff['dd']
    #self.defensiveSecondDistance = buff['dsd']
    self.offensiveDistance = buff['od']

    # set boundaries - entries of the enemy!
    self.setBoundary(gameState)

    # set trainsition model
    self.transition = util.Counter()
    self.transition['favor'] = 0.8
    self.transition['indifferent'] = 0.05

    self.helpDefending = False # initialy, it's not defending
    self.alert = 3
    # these are for offensive agents.
    if self.__class__.__name__ == "OffensiveReflexAgent":
      self.role = "offensive"
      self.disToDefender = None
    else:
      self.role = "defensive"
      self.crazy = False # it would be crazy if scared
Esempio n. 13
0
    def registerInitialState(self, gameState):
        CaptureAgent.registerInitialState(self, gameState)
        self.ready = False
        myTeam = self.getTeam(gameState)
        self.team = {}
        self.team[myTeam[0]] = 1
        self.team[myTeam[1]] = 2

        startx = gameState.getWalls().width / 2
        starty = gameState.getWalls().height / 2
        startPos = []
        if self.getTeam(gameState)[0] % 2 != 0:
            startx -= 1
        self.temStartPoint = (startx, starty)
        minDist = 99999
        myPos = gameState.getAgentState(self.index).getPosition()
        while starty >= 0:
            if gameState.hasWall(startx, starty) == False:
                dist = self.getMazeDistance(myPos, (startx, starty))
                if dist < minDist:
                    self.Bstart = (startx, starty)
                    minDist = dist
            starty -= 1

        startx, starty = self.temStartPoint
        minDist = 99999
        for i in xrange(gameState.getWalls().height - starty):
            if gameState.hasWall(startx, starty) == False:
                dist = self.getMazeDistance(myPos, (startx, starty))
                if dist < minDist:
                    self.Astart = (startx, starty)
                    minDist = dist
            starty += 1

        self.start = (16, 15)
        self.status = None
        basePoint = gameState.getAgentState(myTeam[1]).getPosition()
        x = basePoint[0]
        y = basePoint[1]
        self.opponentStatus = {}
        if self.getTeam(gameState)[0] % 2 != 0:  ## set the origin point,the team is blue
            self.teamName = "blue"
            if self.index == 1:
                self.basePoint = [(x, y - 1), (float(x), float(y - 1))]
            elif self.index == 3:
                self.basePoint = [(x, y), (float(x), float(y))]
            self.opponentStatus[0] = False
            self.opponentStatus[2] = False
        else:
            self.teamName = "red"
            if self.index == 0:
                self.basePoint = [(x, y + 1), (float(x), float(y + 1))]
            elif self.index == 2:
                self.basePoint = [(x, y), (float(x), float(y))]
            self.opponentStatus[1] = False
            self.opponentStatus[3] = False
Esempio n. 14
0
  def registerInitialState(self, gameState):
    CaptureAgent.registerInitialState(self, gameState)

    # Get all non-wall positions on the board
    self.legalPositions = gameState.data.layout.walls.asList(False)

    # Initialize position belief distributions for opponents
    self.positionBeliefs = {}
    for opponent in self.getOpponents(gameState):
      self.initializeBeliefs(opponent)
Esempio n. 15
0
  def registerInitialState(self, gameState):

    CaptureAgent.registerInitialState(self, gameState)
    self.boundary_top = True
    if gameState.getAgentState(self.index).getPosition()[0] == 1:
      self.isRed = True
    else:
      self.isRed = False

    self.boundaries = self.boundaryTravel(gameState)
    self.treeDepth = 3
Esempio n. 16
0
 def registerInitialState(self, gameState):
     global defendFoodList
     global g_intorState
     CaptureAgent.registerInitialState(self, gameState)
     self.start = gameState.getAgentPosition(self.index)
     self.oppIndces = self.getOpponents(gameState)
     self.teamIndces = self.getTeam(gameState)
     self.walls = gameState.getWalls()
     self.deadEnd = self.buildDeadEnd(gameState)
     self.pointToWin = 100
     self.wallMemory = gameState.getWalls().deepCopy()
     self.blockers = []
     defendFoodList = self.getFoodYouAreDefending(gameState).asList()
Esempio n. 17
0
  def registerInitialState(self, gameState):
    CaptureAgent.registerInitialState(self, gameState)
    
    # Initialize Particles
    for index in xrange(gameState.getNumAgents()):
      if index == self.index:
        continue

      if self.isTeammate(gameState, index):
        self.teammateLocations[index] = gameState.getInitialAgentPosition(index)
      else:
        self.enemyParticles[index] = util.Counter()
        self.enemyParticles[index][gameState.getInitialAgentPosition(index)] = self.numParticles
Esempio n. 18
0
  def registerInitialState(self, gameState):
    CaptureAgent.registerInitialState(self, gameState)

    global beliefs
    global validPositions

    self.lastFood = None

    if len(validPositions) == 0:
      # All positions are those that are not walls
      validPositions = gameState.getWalls().asList(False)

      # We know that each enemy must be at its initial position at registration
      for enemyIndex in self.getOpponents(gameState):
        self.establishLocation(enemyIndex, gameState.getInitialAgentPosition(enemyIndex))
Esempio n. 19
0
  def registerInitialState(self, gameState):
    """
    This method handles the initial setup of the
    agent to populate useful fields (such as what team
    we're on).

    A distanceCalculator instance caches the maze distances
    between each pair of positions, so your agents can use:
    self.distancer.getDistance(p1, p2)

    IMPORTANT: This method may run for at most 15 seconds.
    """

    '''
    Make sure you do not delete the following line. If you would like to
    use Manhattan distances instead of maze distances in order to save
    on initialization time, please take a look at
    CaptureAgent.registerInitialState in captureAgents.py.
    '''
    CaptureAgent.registerInitialState(self, gameState)

    '''
    Your initialization code goes here, if you need any.
    '''
    self.start = gameState.getAgentPosition(self.index)
    self.walls = gameState.getWalls()
    top = 0
    bot = self.walls.height
    for food in self.getFood(gameState).asList():
      if food[1] < bot:
        bot = food[1]
      if food[1] > top:
        top = food[1]
    food1,food2 = [], []
    # for food in self.getFood(gameState).asList():
    #   if food[1] > bot+(top-bot)/2:
    #     food1.append(food)
    #   else:
    #     food2.append(food)
    for y in range(self.walls.height):
      for x in range(self.walls.width):
        if (x,y) in self.getFood(gameState).asList():
          if len(food2) <= len(self.getFood(gameState).asList())/2:
            food2.append((x,y))
    for food in self.getFood(gameState).asList():
      if food not in food2:
        food1.append(food)
    self.foodOne,self.foodTwo = food1,food2
Esempio n. 20
0
    def registerInitialState(self, gameState):
      """
      This method handles the initial setup of the
      agent to populate useful fields (such as what team
      we're on).

      A distanceCalculator instance caches the maze distances
      between each pair of positions, so your agents can use:
      self.distancer.getDistance(p1, p2)

      IMPORTANT: This method may run for at most 15 seconds.
      """
      CaptureAgent.registerInitialState(self, gameState)

      self.start = gameState.getAgentPosition(self.index)
      self.startPos=gameState.getAgentState(self.index).getPosition()
Esempio n. 21
0
  def registerInitialState(self, gameState):
    """
    This method handles the initial setup of the
    agent to populate useful fields (such as what team
    we're on). 
    
    A distanceCalculator instance caches the maze distances
    between each pair of positions, so your agents can use:
    self.distancer.getDistance(p1, p2)

    IMPORTANT: This method may run for at most 15 seconds.
    """

    ''' 
    Make sure you do not delete the following line. If you would like to
    use Manhattan distances instead of maze distances in order to save
    on initialization time, please take a look at
    CaptureAgent.registerInitialState in captureAgents.py. 
    '''
    CaptureAgent.registerInitialState(self, gameState)

    # Store team and enemy indices
    self.teamIndices = self.getTeam(gameState)
    self.enemyIndices = self.getOpponents(gameState)

    # Check how recently we were near the enemy to check if we've knocked him out
    self.nearEnemyCounter = 0

    # Set up particle filters to track enemy locations
    self.enemyLocFilters = {}
    for i in self.enemyIndices:
      self.enemyLocFilters[i] = (ParticleFilter(gameState, i,
                              gameState.getInitialAgentPosition(i)))


    # Dict of qValues with (state, action) tuples as keys
    self.qValues = util.Counter()

    self.modes = ['Offense', 'Defense']
    self.currMode = 'Offense'
    self.discount = 0.5
    self.learningRate = 0.5

    self.initializeWeights()

    test += 1
    print(test)
Esempio n. 22
0
  def registerInitialState(self, gameState):
    CaptureAgent.registerInitialState(self, gameState)

    """
    Initialize all variables
    """
    height = gameState.data.layout.height
    width = gameState.data.layout.width
    walls = gameState.data.layout.walls
    curr_state = gameState.getAgentState(self.index)
    curr_pos = gameState.getAgentState(self.index).getPosition()
    us = self.getTeam(gameState)
    them = self.getOpponents(gameState)
    our_food = self.getFoodYouAreDefending(gameState)
    their_food = self.getFood(gameState)
    score = self.getScore(gameState)
    capsules = self.getCapsules(gameState)


    food_grid = self.getFoodYouAreDefending(gameState)
    halfway = food_grid.width / 2
    if self.red:    
      xrange = range(halfway)
    else:       
      xrange = range(halfway, food_grid.width)
    for y in range(food_grid.height):
      for x in xrange:
        food_grid[x][y] = True
    self.our_side = food_grid


    """
    HMM for reading where opponent is
    """
    self.hmm_list = dict([(index, util.Counter()) for 
      index in self.getOpponents(gameState)])
    if self.red:
      self.agentsOnTeam = gameState.getRedTeamIndices()
    else:
      self.agentsOnTeam = gameState.getBlueTeamIndices()
    self.legalPositions = [p for p in gameState.getWalls().asList(False)]
    for dist in self.hmm_list.values():
      # initializes randomly over all positions
      for p in self.legalPositions:
        dist[p] = 1
      dist.normalize()
Esempio n. 23
0
  def registerInitialState(self, gameState):
    CaptureAgent.registerInitialState(self, gameState)
    self.validPositions = gameState.getWalls().asList(False)
    self.enemyIndices = self.getOpponents(gameState)
    self.numParticles = 25
    self.starts = {}
    self.steps = [(0, 0), (0, 1), (1, 0), (0, -1), (-1, 0)]
    self.lastFood = None

    for index in gameState.getRedTeamIndices() + gameState.getBlueTeamIndices():
      self.starts[index] = gameState.getInitialAgentPosition(index)

    self.enemyParticles = {}

    for index in self.enemyIndices:
      self.enemyParticles[index] = util.Counter()
      self.enemyParticles[index][self.starts[index]] = self.numParticles
Esempio n. 24
0
  def registerInitialState(self, gameState):
    CaptureAgent.registerInitialState(self,gameState)
    self.distancer.getMazeDistances()
    if self.isRed:
      self.friends = gameState.getRedTeamIndices()
      self.enemies = gameState.getBlueTeamIndices()
    else:
      self.friends = gameState.getBlueTeamIndices()
      self.enemies = gameState.getRedTeamIndices()

    self.inferenceModule.initialize(gameState, self.isRed, self.enemies, self.index)#infModule checks to make sure we don't do this twice
    self.cellLayout = cellLayout.CellLayout(gameState.data.layout, self.distancer) # pass one of these guys around
    thisLayoutInfo = boardExtractor.vectorize(gameState.data.layout,self.cellLayout)
    boardValues = knownWeights.ClusteringValues
    boardValues['thisone'] = thisLayoutInfo
   
    clusters, clusterElements =kMeansClassifier.kMeans(boardValues,4)

    ourCluster =None
    for cluster in clusterElements:
      if 'thisone' in clusterElements[cluster]:
        ourCluster =cluster
    
    cellmates = list(clusterElements[ourCluster])
    cellmates.remove('thisone')

    playingWeights = util.Counter()
    for mate in cellmates:
      for key in knownWeights.WeightsMap[mate]:
        playingWeights[key] += knownWeights.WeightsMap[mate][key]
    
    playingWeights.divideAll(float(len(cellmates)))
 
    #print playingWeights
      

#    self.setWeights(playingWeights)
    self.setWeights(weightsConfig.WeightsMap)
    
    self.holdTheLineModule = holdTheLineModule.holdTheLineModule( self.friends, self.enemies, self.isRed,self.index, self.heuristicWeights, self.inferenceModule, self.cellLayout, self.distancer)
    self.defenseModule = defenseModule.defenseModule( self.friends, self.enemies, self.isRed,self.index, self.heuristicWeights, self.inferenceModule, self.cellLayout, self.distancer)
    self.attackModule = attackModule.AttackModule( self.friends, self.enemies, self.isRed,self.index, self.heuristicWeights, self.inferenceModule, self.cellLayout, self.distancer)
Esempio n. 25
0
  def registerInitialState(self, gameState):
    """
    This method handles the initial setup of the
    agent to populate useful fields (such as what team
    we're on). 
    
    A distanceCalculator instance caches the maze distances
    between each pair of positions, so your agents can use:
    self.distancer.getDistance(p1, p2)

    IMPORTANT: This method may run for at most 15 seconds.
    """

    ''' 
    Make sure you do not delete the following line. If you would like to
    use Manhattan distances instead of maze distances in order to save
    on initialization time, please take a look at
    CaptureAgent.registerInitialState in captureAgents.py. 
    '''
    CaptureAgent.registerInitialState(self, gameState)
    ''' 
    Your initialization code goes here, if you need any.
    '''
    # Store team and enemy indices
    self.teamIndices = self.getTeam(gameState)
    self.enemyIndices = self.getOpponents(gameState)

    # Decide which Pac-man takes the bottom half
    self.isBottom = (self.index == min(self.teamIndices))

    # Divvy up food between the two Pac-men
    self.foodLists = self.distributeFood(False, False, gameState)
    self.foodList = self.foodLists['Bottom'] if self.isBottom else self.foodLists['Top']

    # Check how recently we were near the enemy to check if we've knocked him out
    self.nearEnemyCounter = 0

    # Set up particle filters to track enemy locations
    self.enemyLocFilters = {}
    for i in self.enemyIndices:
      self.enemyLocFilters[i] = (ParticleFilter(gameState, i,
                              gameState.getInitialAgentPosition(i)))
    def registerInitialState(self, gameState):
        """
    This method handles the initial setup of the
    agent to populate useful fields (such as what team
    we're on).

    A distanceCalculator instance caches the maze distances
    between each pair of positions, so your agents can use:
    self.distancer.getDistance(p1, p2)

    IMPORTANT: This method may run for at most 15 seconds.
    """
        '''
    Make sure you do not delete the following line. If you would like to
    use Manhattan distances instead of maze distances in order to save
    on initialization time, please take a look at
    CaptureAgent.registerInitialState in captureAgents.py.
    '''
        CaptureAgent.registerInitialState(self, gameState)
        '''
Esempio n. 27
0
    def registerInitialState(self, gameState):
        """
    This method handles the initial setup of the
    agent to populate useful fields (such as what team
    we're on).

    A distanceCalculator instance caches the maze distances
    between each pair of positions, so your agents can use:
    self.distancer.getDistance(p1, p2)

    IMPORTANT: This method may run for at most 15 seconds.
    """

        CaptureAgent.registerInitialState(self, gameState)
        self.home = gameState.getAgentState(self.index).getPosition()
        self.originalFood = len(self.getFood(gameState).asList())
        self.defendFood = len(self.getFoodYouAreDefending(gameState).asList())
        self.walls = gameState.getWalls().asList()
        self.lastEaten = None
        self.eatenFood = None
Esempio n. 28
0
 def registerInitialState(self, gameState):
   self.visited = util.Counter()
   self.visited_food = set()
   self.moment = None
   self.features = {
   'bias': lambda *args: -1000,
   'explore': lambda pos: (100./(1+self.visited[pos]))**3,
   'score': lambda score: score**3,
   'volatility': lambda winning: winning * random.randint(-100, 100) + random.randint(0,100) * random.randint(1, 5)**(random.random()+.5),
   'hungry': self.hunger_score,
   'near_food': self.near_food,
   "curl": self.curl_score,
   "dontstop": lambda x: 500*(x if x else -10),
   "foodavg": self.foodavg,
   "defensive": lambda (my_food, my_caps): len(my_food)**3
   }
   self.corrections = {}
   self.age = 0
   self.lastsal = None
   CaptureAgent.registerInitialState(self, gameState)
Esempio n. 29
0
 def registerInitialState(self, gameState):
     CaptureAgent.registerInitialState(self, gameState)
     # Get the x-axis value of the middle wall in the game board
     self.midWidth = gameState.data.layout.width / 2
     # Get the legal positions that agents could possibly be.
     self.legalPositions = [
         p for p in gameState.getWalls().asList(False) if p[1] > 1
     ]
     # Use a maze distance calculator
     self.distancer.getMazeDistances()
     # Define which mode the agent is in
     self.offenseTactic = False
     # Get enemies' index
     self.enemyIndices = self.getOpponents(gameState)
     # Initialize position probability distribution of each enemy by assigning every legal position the same probability
     self.enemyPositionitionDistribution = {}
     for enemy in self.enemyIndices:
         self.enemyPositionitionDistribution[enemy] = util.Counter()
         self.enemyPositionitionDistribution[enemy][
             gameState.getInitialAgentPosition(enemy)] = 1.
    def registerInitialState(self, gameState):
        """
		@version 1.3
		"""
        CaptureAgent.registerInitialState(self, gameState)
        '''
		Your initialization code goes here, if you need any.
		'''
        # Agent's Attribution
        self.beginningTower = gameState.getAgentPosition(self.index)
        self.teamColor = gameState.isOnRedTeam(self.index)
        self.myPosition = gameState.getAgentPosition(self.index)
        self.carryFood = 0
        self.teamFood = self.getFoodYouAreDefending(gameState).asList()
        self.isAttacker = True
        self.isDefender = True
        self.deathNumber = -1  #due to it will plus 1 at begining game

        # Logic Attribution
        self.foodList = self.getFood(gameState).asList()
        self.maxFood = len(self.foodList)
        self.width = gameState.data.layout.width
        self.height = gameState.data.layout.height
        # print "self.width:", self.width
        # print "self.height:", self.height
        # self.stepLeft = 300
        self.stepLeft = gameState.data.timeleft / 4
        # print "!!!!!",self.stepLeft
        self.considerBack = False
        self.portals = self.getPortals(gameState)
        self.teamIndices = self.getTeam(gameState)
        for index in self.teamIndices:
            if index != self.index:
                self.teamateIndex = index
        # self.oldApproximatingQValue = 0

        numPortals = len(self.portals)
        self.midPortal = self.portals[numPortals / 2]
        self.numOfOppPac = 0
        self.opponents = self.getOpponents(gameState)  # indices of opponents
        self.walls = gameState.getWalls()
Esempio n. 31
0
    def registerInitialState(self, gameState):
        self.start = gameState.getAgentPosition(self.index)
        CaptureAgent.registerInitialState(self, gameState)
        self.warnings = 0

        # Enemy and teammate indices
        self.enemy_indices = self.getOpponents(gameState)
        team_indices = self.getTeam(gameState)
        team_indices.remove(self.index)
        self.teammate_index = team_indices[0]

        # Analysing layout
        self.walls = set(gameState.data.layout.walls.asList())
        self._maxx = max([item[0] for item in self.walls])
        self._maxy = max([item[1] for item in self.walls])
        self.sign = 1 if gameState.isOnRedTeam(self.index) else -1

        # Determining home boundary
        self.homeXBoundary = self.start[0] + (
            (self._maxx // 2 - 1) * self.sign)
        cells = [(self.homeXBoundary, y) for y in range(1, self._maxy)]
        self.homeBoundaryCells = [
            item for item in cells if item not in self.walls
        ]

        # Determining legal actions count for all cells
        valid_cells = self.getGrid(1, 1, self._maxx, self._maxy)
        self._legalActions = util.Counter()
        for cell in valid_cells:
            x, y = cell
            if (x - 1, y) in valid_cells:
                self._legalActions[cell] += 1
            if (x + 1, y) in valid_cells:
                self._legalActions[cell] += 1
            if (x, y - 1) in valid_cells:
                self._legalActions[cell] += 1
            if (x, y + 1) in valid_cells:
                self._legalActions[cell] += 1

        # Position history
        self._positionsHistory = []
Esempio n. 32
0
 def registerInitialState(self, gameState):
     self.start = gameState.getAgentPosition(self.index)
     CaptureAgent.registerInitialState(self, gameState)
     # Sets if agent is on red team or not
     # if self.red:
     #   CaptureAgent.registerTeam(self, gameState.getRedTeamIndices())
     # else:
     #   CaptureAgent.registerTeam(self, gameState.getBlueTeamIndices())
     """self variables"""
     self.walls = gameState.getWalls()
     self.mapWidth = gameState.getWalls().width
     self.mapHeight = gameState.getWalls().height
     self.mapSize = self.mapWidth * self.mapHeight
     #agent的偏好,偏好行动范围
     self.favoredY = 0
     #index of ally 队友的index
     self.ally = self.index
     for i in self.getTeam(gameState):
         if self.index != i:
             self.ally = i
     #indexes of enemies
     self.enemies = self.getOpponents(gameState)
     #positions that are not walls on the other side敌方地盘的合法位置
     self.enemyPos = []
     #positions that are not walls on our side我方地盘的合法位置
     self.ourPos = []
     #escape goals -- the entrance positions on our side
     self.escapeGoals = []
     #corner depth: a map of position index and depth value, deeper corners have higher depths, 0 by default
     self.cornerDepth = util.Counter()
     self.beliefs = {}
     #整张地图的可行动位置
     self.legalPositions = gameState.getWalls().asList(False)
     #地图中线的横坐标
     self.midWidth = gameState.data.layout.width / 2
     self.lastTurnFoodList = self.getFoodYouAreDefending(gameState).asList()
     #a list of enemy's (index, pos, isPacman, scaredTimer), fetched in each chooseAction
     self.enemyInfo = []
     """initiate variables"""
     self.init(gameState)
     self.startBelief(gameState)
Esempio n. 33
0
    def registerInitialState(self, gameState):
        """
    This method handles the initial setup of the
    agent to populate useful fields (such as what team
    we're on).
    A distanceCalculator instance caches the maze distances
    between each pair of positions, so your agents can use:
    self.distancer.getDistance(p1, p2)
    IMPORTANT: This method may run for at most 15 seconds.
    """
        '''
    Make sure you do not delete the following line. If you would like to
    use Manhattan distances instead of maze distances in order to save
    on initialization time, please take a look at
    CaptureAgent.registerInitialState in captureAgents.py.
    '''
        CaptureAgent.registerInitialState(self, gameState)

        self.walls = gameState.getWalls()
        self.width = self.walls.width
        self.height = self.walls.height
        # print gameState
        # print self.getFood(gameState)
        # print self.getFood(gameState).asList()
        # print "width", self.width
        # print "heigth", self.height
        # exit(0)
        self.hasFood = 0
        self.lastAction = None
        self.lastState = None
        self.capsules = self.getCapsules(gameState)
        self.isSuper = False
        # print self.capsules
        # record whether the pacman is going back home
        self.isGoingHome = False
        # record whether the pacman is getting the capsule
        self.isGettingCapsule = False

        # Index of enemies and team
        self.enemyIndex = self.getOpponents(gameState)
        self.teamIndex = self.getTeam(gameState)
 def registerInitialState(self, gameState):
     """
       This method handles the initial setup of the
       agent to populate useful fields (such as what team
       we're on).
       A distanceCalculator instance caches the maze distances
       between each pair of positions, so your agents can use:
       self.distancer.getDistance(p1, p2)
       IMPORTANT: This method may run for at most 15 seconds.
       """
     '''
       Make sure you do not delete the following line. If you would like to
       use Manhattan distances instead of maze distances in order to save
       on initialization time, please take a look at
       CaptureAgent.registerInitialState in captureAgents.py.
       '''
     CaptureAgent.registerInitialState(self, gameState)
     '''
       Your initialization code goes here, if you need any.
       '''
     self.foodNum = 0
     # self.pathToExit = []
     self.myTeam = ''
     self.exitCol = []
     self.walls = gameState.getWalls()
     # get what team the bot is on
     if self.getTeam(gameState)[0] % 2 == 0:
         # exit direction left
         self.myTeam = 'red'
     else:
         # exit direction right
         self.myTeam = 'blue'
     # find available exit column spaces
     if self.myTeam == 'blue':
         exitCol = (gameState.data.layout.width) // 2
     else:
         exitCol = (gameState.data.layout.width - 1) // 2
     for i in range(1, gameState.data.layout.height - 1):
         # self.debugDraw([((gameState.data.layout.width - 1) // 2, i)], [0, 1, 0])
         if not self.walls[exitCol][i]:
             self.exitCol.append((exitCol, i))
Esempio n. 35
0
    def registerInitialState(self, gameState):
        """
    This method handles the initial setup of the
    agent to populate useful fields (such as what team
    we're on). 
    
    A distanceCalculator instance caches the maze distances
    between each pair of positions, so your agents can use:
    self.distancer.getDistance(p1, p2)

    IMPORTANT: This method may run for at most 15 seconds.
    """
        ''' 
    Make sure you do not delete the following line. If you would like to
    use Manhattan distances instead of maze distances in order to save
    on initialization time, please take a look at
    CaptureAgent.registerInitialState in captureAgents.py. 
    '''
        CaptureAgent.registerInitialState(self, gameState)
        ''' 
    Your initialization code goes here, if you need any.
    '''
        global agentPositions, agentStrategies
        self.startingPosition = gameState.getAgentState(
            self.index).getPosition()
        agentPositions[self.index] = self.startingPosition
        agentStrategies[self.index] = 'offense'

        global allFood, topFood, bottomFood, topChosen
        allFood = self.getFood(gameState).asList()
        for x, y in allFood:
            if y > (gameState.data.layout.height / 2):
                topFood.append((x, y))
            else:
                bottomFood.append((x, y))

        if topChosen == False:
            self.onTop = True
            topChosen = True
        else:
            self.onTop = False
Esempio n. 36
0
    def registerInitialState(self, gameState):

        CaptureAgent.registerInitialState(self, gameState)
        self.start = gameState.getAgentPosition(self.index)
        self.walls = gameState.getWalls().asList()
        self.beenEatenFood = []

        self.middleLines = []
        self.goal = None
        self.midX = int((gameState.data.layout.width - 2) / 2)
        self.oppoMidX = int((gameState.data.layout.width - 2) / 2)
        self.opponentsIndex = self.getOpponents(gameState)
        if not self.red:
            self.midX += 1
        else:
            self.oppoMidX += 1

        self.attackDir = Directions.WEST
        if self.red:
            self.attackDir = Directions.EAST
        '''
Esempio n. 37
0
  def registerInitialState(self, gameState):
    CaptureAgent.registerInitialState(self, gameState)

    self.legalPositions = [p for p in gameState.getWalls().asList(False) if p[1] > 1]  
    
    "This team's positions"
    for i in self.getTeam(gameState):
      self.positions[i] = [True, None, gameState.getAgentPosition(i)]
    
    for i in self.getOpponents(gameState):
      self.positions[i] = [False, util.Counter(), None]
    
    for position in self.legalPositions:
      for (certain, counter, actualPos) in self.positions:
        if not certain:
          counter[position] = 1.0
    
    for (certain, counter, actualPos) in self.positions:
      if not certain:
        counter.normalize()
        actualPos = counter.argMax()
Esempio n. 38
0
    def registerInitialState(self, gameState):

        CaptureAgent.registerInitialState(self, gameState)

        # Initialization calculate the homepoint
        # homepoint means the points along the middle Line of layout
        # Red on left, Blue on right, therefore blue needs + 1
        # pacman go to homepoint to score and turns into ghost
        self.homePoints = []
        if self.red:
            central = (gameState.data.layout.width - 2) / 2
        else:
            central = ((gameState.data.layout.width - 2) / 2) + 1

        for height in range(1, gameState.data.layout.height - 1):
            if not gameState.hasWall(central, height):
                self.homePoints.append((central, height))

        self.entryPoints = self.homePoints
        self.survivalMode = False
        self.totalFood = len(self.getFood(gameState).asList())
Esempio n. 39
0
    def registerInitialState(self, gameState):
        self.start = gameState.getAgentPosition(self.index)
        CaptureAgent.registerInitialState(self, gameState)
        self.food = None
        self.totalFood = len(self.getFood(gameState).asList())
        self.startState = gameState

        foodList = self.getFood(gameState).asList()
        self.chooseFood(foodList)
        self.distancer = distanceCalculator.Distancer(gameState.data.layout)
        self.distancer.getMazeDistances()
        self.escape = 1
        self.defense= 2
        self.offense = 3
        self.status = self.offense
        self.goodFood = self.chooseGoodFood(foodList)
        self.save = False
        self.bound = gameState.data.layout.width / 2
        if gameState.isOnRedTeam(self.index):
            self.bound -= 1
        self.foodDefend = self.getFoodYouAreDefending(gameState).asList()
Esempio n. 40
0
  def registerInitialState(self, gameState):
    CaptureAgent.registerInitialState(self, gameState)
    '''
    Your initialization code goes here, if you need any.
    '''
    self.createPacmanDomain()
    self.start = gameState.getAgentPosition(self.index)
    self.masterFoods = self.getFood(gameState).asList()
    self.cornerFoods = self.isSurrounded(gameState)
    self.masterCapsules = self.getCapsules(gameState)
    self.homePos = self.getBoundaryPos(gameState, 1)
    self.pddlFluentGrid = self.generatePDDLFluentStatic(gameState)
    self.pddlObject = self.generatePddlObject(gameState)
    self.foodEaten = 0
    self.currScore = self.getScore(gameState)
    self.history = Queue()

    self.stuck = False
    self.capsuleTimer = 0
    self.superPacman = False
    self.foodCarrying = 0
Esempio n. 41
0
    def registerInitialState(self, gameState):
        "Initializes beliefs and inference modules"
        CaptureAgent.registerInitialState(self, gameState)

        self.game_state = gameState

        self.opponent_agent_indices = utility.get_opponents_agent_indices(self.game_state, self.index)
        ghostAgents = [RandomGhost(index) for index in self.opponent_agent_indices]
        # print("ghostAgents", list(map(lambda x: x.index, ghostAgents)))
        # print("self.index: ", self.index)
        self.inferenceModules = [self.inferenceType(a, self.index) for a in ghostAgents]

        import __main__
        self.display = __main__._display
        for inference in self.inferenceModules:
            inference.initialize(gameState)
        # for ghost, inference_modules in zip(ghostAgents, self.inferenceModules):
        #     inference_modules.addGhostAgent(ghost)

        self.ghostBeliefs = [inf.getBeliefDistribution() for inf in self.inferenceModules]
        self.firstMove = True
  def registerInitialState(self, gameState):
    self.start = gameState.getAgentPosition(self.index)
    CaptureAgent.registerInitialState(self, gameState)
    #weights & features for offensiveAgent
    self.weights = {'carrying':0.0, 'successorScore': 0.0, 'getFood': 0.0
                    , 'getCaplual': 0.0, 'enemyOneStepToPacman': 0.0, 'towardToGhost': 0.0,'distanceToFood': 0.0,
                    'back': 0.0, 'stop': 0.0, 'eatGhost':0.0, 'reverse': -2.0}
    #self.weights = {'successorScore': 0.0, 'distanceToFood': 0.0}
    
    self.epsilon = 0.05
    self.alpha = 0.5
    self.discountFactor = 0.5

    self.reward = 0
		
    try:
      with open('./teams/Alpha-baby/weights.txt', "r") as file:
        #print"done reading weights"
        self.weights = eval(file.read())
    except IOError:
          return
Esempio n. 43
0
  def registerInitialState(self, gameState):
    CaptureAgent.registerInitialState(self, gameState)

    global beliefs
    global validPositions

    self.initializeLasts()
    self.enemyFoodCount = {}

    for enemyIndex in self.getOpponents(gameState):
      self.enemyFoodCount[enemyIndex] = 0

    self.variousDistributions = []

    if len(validPositions) == 0:
      # All positions are those that are not walls
      validPositions = gameState.getWalls().asList(False)

      # We know that each enemy must be at its initial position at registration
      for enemyIndex in self.getOpponents(gameState):
        self.establishLocation(enemyIndex, gameState.getInitialAgentPosition(enemyIndex))
Esempio n. 44
0
    def registerInitialState(self, gameState):
        CaptureAgent.registerInitialState(self, gameState)
        self.distancer.getMazeDistances()

        #########################################################################################################
        #########################################################################################################
        #########################################################################################################
        #########################################################################################################

        self.numSims = NUM_SIM
        self.sturns = SIM_LEVEL
        self.levels = LEVEL
        self.svalue = 0
        self.smoves = []
        self.gameState = gameState
        self.current_node = Node(
            MState(self.gameState, self.index, self.svalue, self.smoves,
                   self.sturns))
        #print '~~~register self.current_node', self.current_node
        self.startPosition = self.current_node.mstate.gameState.getAgentState(
            self.index).getPosition()
Esempio n. 45
0
    def registerInitialState(self, gameState):
        """
        This method handles the initial setup of the
        agent to populate useful fields (such as what team
        we're on).

        A distanceCalculator instance caches the maze distances
        between each pair of positions, so your agents can use:
        self.distancer.getDistance(p1, p2)

        IMPORTANT: This method may run for at most 15 seconds.
        """
        '''
        Make sure you do not delete the following line. If you would like to
        use Manhattan distances instead of maze distances in order to save
        on initialization time, please take a look at
        CaptureAgent.registerInitialState in captureAgents.py.
        '''
        CaptureAgent.registerInitialState(self, gameState)
        self.gameNumber += 1

        # Your initialization code goes here, if you need any.
        self.action_list = [Directions.STOP]
        self.initial_food = self.getFood(gameState).count()
        self.initial_defending_food = self.getFoodYouAreDefending(
            gameState).count()

        self.exploration_rate = self.training_exploration_rate if self.isTraining(
        ) else self.testing_exploration_rate

        if winning_weights is not None:
            weights1, bias1, weights2, bias2, weights3, bias3 = winning_weights
            self.sess.run([
                self.weights1.assign(weights1),
                self.bias1.assign(bias1),
                self.weights2.assign(weights2),
                self.bias2.assign(bias2),
                self.weights3.assign(weights3),
                self.bias3.assign(bias3)
            ])
Esempio n. 46
0
  def registerInitialState(self, gameState):
	CaptureAgent.registerInitialState(self, gameState)

	self.lastAction = None
	self.offensive = True
	self.distancer.getMazeDistances()
	self.start = gameState.getInitialAgentPosition(self.index)
	self.midWidth = gameState.data.layout.width/2
	self.midHeight = gameState.data.layout.height/2
	self.legalPositions = [p for p in gameState.getWalls().asList(False) if p[1] > 1]
	self.team = self.getTeam(gameState)
	self.opponents = self.getOpponents(gameState)
	self.distancer.getMazeDistances()

	for t in self.team:
	  if self.index <= t:
		break
	else:
	  self.attacker = True
	if self.attacker == None:
	  self.attacker = False

	# Compute central positions without walls from map layout.
	# The defender will walk among these positions to defend
	# its territory.
	if self.red:
	  centralX = (gameState.data.layout.width - 2)/2
	else:
	  centralX = ((gameState.data.layout.width - 2)/2) + 1
	self.noWallSpots = []
	for i in range(1, gameState.data.layout.height - 1):
	  if not gameState.hasWall(centralX, i):
		self.noWallSpots.append((centralX, i))
	# Remove some positions. The agent do not need to patrol
	# all positions in the central area.
	while len(self.noWallSpots) > (gameState.data.layout.height -2)/2:
	  self.noWallSpots.pop(0)
	  self.noWallSpots.pop(len(self.noWallSpots)-1)
	# Update probabilities to each patrol point.
	self.distFoodToPatrol(gameState)
Esempio n. 47
0
    def registerInitialState(self, gameState):
        '''
	Some calculation in the first 15s, which can help the agents decide the
	actions quickly
	'''
        CaptureAgent.registerInitialState(self, gameState)
        self.lastAction = None
        self.offensive = True
        # self.distancer.getMazeDistances()
        # self.start = gameState.getInitialAgentPosition(self.index)
        if self.red:
            self.midWidth = (gameState.data.layout.width - 2) / 2
        else:
            self.midWidth = ((gameState.data.layout.width - 2) / 2) + 1
        self.midHeight = gameState.data.layout.height / 2
        self.legalPositions = [
            p for p in gameState.getWalls().asList(False) if p[1] > 1
        ]
        self.opponents = self.getOpponents(gameState)
        self.distancer.getMazeDistances()
        self.team = self.index
        for sameTeam in self.getTeam(gameState):
            if sameTeam != self.index:
                self.team = sameTeam
        # Compute central fields without walls from map layout.
        # The defender will walk among these positions when no
        # opponents ghosts are observable to defend its territory.
        self.noWallSpots = []
        for i in range(1, gameState.data.layout.height - 1):
            if not gameState.hasWall(self.midWidth, i):
                self.noWallSpots.append((self.midWidth, i))
        # Remove some positions.
        while len(self.noWallSpots) > (gameState.data.layout.height - 2) / 2:
            self.noWallSpots.pop(0)
            self.noWallSpots.pop(len(self.noWallSpots) - 1)
        # Initialize the probabilities to each patrol point.
        self.distFoodToPatrol(gameState)
        self.deadEnd = list()
        self.DFSExplore(gameState, [])
        self.timeReverse = 0
Esempio n. 48
0
  def registerInitialState(self, gameState):
    """
    Initialize the beliefs of the enemies uniformly
    and many related operations
    """
    CaptureAgent.registerInitialState(self, gameState)

    global beliefs

    # legalPositions is a useful variable. it's not provided! let's construct it
    width = gameState.data.layout.width
    height = gameState.data.layout.height
    self.legalPositions = [(x, y) for x in range(width) for y in range(height) if not gameState.hasWall(x, y)]

    # set legalPositions for defending and offending respectively
    self.defensiveLegalPositions = [p for p in self.legalPositions if self.isOurTerrain(gameState, p)]
    self.offensiveLegalPositions = [p for p in self.legalPositions if not (p in self.defensiveLegalPositions)]

    # we need a new distancer for calculating distance within defensive area
    tmpGameState = gameState.deepCopy()
    defensiveLayout = tmpGameState.data.layout
    for pos in self.offensiveLegalPositions:
      defensiveLayout.walls[pos[0]][pos[1]] = True
    self.defensiveDistancer = distanceCalculator.Distancer(defensiveLayout)
    self.defensiveDistancer.getMazeDistances()

    # initialize beliefs according to legalPositions
    for enemyId in self.getOpponents(gameState):
      self.initializeBeliefs(gameState, enemyId, initial = True)

    # set availableActions for each grid
    self.setNeighbors(gameState)

    # set boundaries - entries of the enemy!
    self.setBoundary(gameState)

    # set trainsition model
    self.transition = util.Counter()
    self.transition['favor'] = 0.8
    self.transition['indifferent'] = 0.05
Esempio n. 49
0
  def registerInitialState(self, gameState):
    """
    Initialize the beliefs of the enemies uniformly
    and many related operations
    """
    CaptureAgent.registerInitialState(self, gameState)

    global beliefs

    # legalPositions is a useful variable. it's not provided! let's construct it
    width = gameState.data.layout.width
    height = gameState.data.layout.height
    self.legalPositions = [(x, y) for x in range(width) for y in range(height) if not gameState.hasWall(x, y)]

    # set legalPositions for defending and offending respectively
    self.defensiveLegalPositions = [p for p in self.legalPositions if self.isOurTerrain(gameState, p)]
    self.offensiveLegalPositions = [p for p in self.legalPositions if not (p in self.defensiveLegalPositions)]

    # we need a new distancer for calculating distance within defensive area
    tmpGameState = gameState.deepCopy()
    defensiveLayout = tmpGameState.data.layout
    for pos in self.offensiveLegalPositions:
      defensiveLayout.walls[pos[0]][pos[1]] = True
    self.defensiveDistancer = distanceCalculator.Distancer(defensiveLayout)
    self.defensiveDistancer.getMazeDistances()

    # initialize beliefs according to legalPositions
    for enemyId in self.getOpponents(gameState):
      self.initializeBeliefs(gameState, enemyId, initial = True)

    # set availableActions for each grid
    self.setNeighbors(gameState)

    # set boundaries - entries of the enemy!
    self.setBoundary(gameState)

    # set trainsition model
    self.transition = util.Counter()
    self.transition['favor'] = 0.8
    self.transition['indifferent'] = 0.05
Esempio n. 50
0
    def registerInitialState(self, gameState):
        """
        This method handles the initial setup of the
        agent to populate useful fields (such as what team
        we're on).
        A distanceCalculator instance caches the maze distances
        between each pair of positions, so your agents can use:
        self.distancer.getDistance(p1, p2)
        IMPORTANT: This method may run for at most 15 seconds.
        """

        CaptureAgent.registerInitialState(self, gameState)
        self.home = gameState.getAgentState(self.index).getPosition()
        self.defendFood = len(self.getFoodYouAreDefending(gameState).asList())
        self.lastEaten = None
        self.eatenFood = None
        self.walls = gameState.getWalls()
        self.getmycap = self.getCapsulesYouAreDefending(gameState)
        self.initialPosition = gameState.getInitialAgentPosition(self.index)
        self.ghostPos = None
        self.backhome = False
        self.checkScare = False
        self.middle = []
        self.foodList = len(self.getFood(gameState).asList())
        # Initialization calculate the homepoint
        # homepoint means the points along the middle Line of layout
        # Red on left, Blue on right, therefore blue needs + 1
        # pacman go to homepoint to score and turns into ghost

        if self.red:
            central = (gameState.data.layout.width - 2) / 2 - 1
            self.ghostPos = (gameState.data.layout.width - 2,
                             gameState.data.layout.height - 2)
        else:
            central = ((gameState.data.layout.width - 2) / 2)
            self.ghostPos = (1, 1)

        for height in range(0, gameState.data.layout.height):
            if not (central, height) in gameState.getWalls().asList():
                self.middle.append((central, height))
    def registerInitialState(self, gameState):
        """
        Handles the initial setup of the agent to populate useful fields 
        (such as what team we're on).

        A distanceCalculator instance caches the maze distances
        between each pair of positions, so your agents can use:
        self.distancer.getDistance(p1, p2)

        IMPORTANT: This method may run for at most 15 seconds.
        """
        CaptureAgent.registerInitialState(self, gameState)
        self.start = gameState.getAgentPosition(self.index)

        # Either read from weights file if exists, or initialize weights to a util.Counter() with all features set to 0.0
        # At the end of the game, will write weights to file and create the file if it didn't already exist
        self.weights = self.initializeWeights()
        pprint(self.weights)

        # LEARNING PARAMETERS
        self.discount = 0.1
        # learning rate; make large (> 0.1) for training and 0 for exploitation
        # self.alpha = 0.1 if TRAINING else 0.0
        self.alpha = 0.1
        # probability that the agent will choose randomly instead of optimally; only relevant during training
        self.epsilon = 0.1
        self.livingReward = -0.1

        # Updates some of the data for the agent based on previous game results
        if TRAINING and path.exists(WEIGHTS_FILE):
            self.initializeDataFromPreviousGame()

        # MAINTAINING GAME INFORMATION
        self.lastAction = Directions.STOP
        # The value of this should be 76; any two positions in this maze can be at most 76 moves apart
        self.maxMazeDistance = max(self.distancer._distances.values())
        self.justDied = 0
        # KEEPING TRACK OF FOOD
        self.collectedFood = 0
        self.totalFood = len(self.getFood(gameState).asList())
Esempio n. 52
0
    def registerInitialState(self, gameState):

        CaptureAgent.registerInitialState(self, gameState)
        self.distancer.getMazeDistances()
        self.target = None
        self.previousFood = None
        # Compute central positions without walls from map layout.
        # The defender will walk among these positions to defend
        # its territory.

        self.defenderList = {}
        if self.red:
            middle = gameState.data.layout.width // 2 - 1
        else:
            middle = gameState.data.layout.width  // 2
        self.boundary = []
        for i in range(1, gameState.data.layout.height - 1):
            if not gameState.hasWall(middle, i):
                self.boundary.append((middle, i))

        # Initialize probabilities
        self.DefendingProbability(gameState)
Esempio n. 53
0
    def registerInitialState(self, gameState):

        self.weight = {}
        self.weight['Defensive'] = {}
        """
    Back up weights for testing purpose
    self.weight['Offensive'] = {'distanceToFood': 1.0, 'distanceToCapsule':1.1,'DistanceToGhost':2.0,
                                'returnHome': 2.0}
    distanceToFood:21.1358828621;distanceToCapsule:72.076020285;distanceToGhost:-7.24735154835;returnHome:3.95426736099
    """

        # Set learning rate...etc
        self.epsilon = 0.1  #exploration prob
        self.alpha = 0.3  #learning rate
        self.discountRate = 0.95

        # Agent start location
        self.start = gameState.getAgentPosition(self.index)

        self.minGhost = 0.0

        CaptureAgent.registerInitialState(self, gameState)
    def registerInitialState(self, gameState):
        '''
    Make sure you do not delete the following line. If you would like to
    use Manhattan distances instead of maze distances in order to save
    on initialization time, please take a look at
    CaptureAgent.registerInitialState in captureAgents.py.
    '''

        self.start = gameState.getAgentPosition(self.index)
        CaptureAgent.registerInitialState(self, gameState)

        #Stablishes the boundary of my territory
        if self.red:
            self.edge = (gameState.data.layout.width - 2) // 2
        else:
            self.edge = ((gameState.data.layout.width - 2) // 2) + 1

        self.boundary = []
        for i in range(1, gameState.data.layout.height - 1):
            if not gameState.hasWall(self.edge, i):
                self.boundary.append((self.edge, i))
        '''
Esempio n. 55
0
  def registerInitialState(self, gameState):
	CaptureAgent.registerInitialState(self, gameState)
	self.distancer.getMazeDistances()

	# Compute central positions without walls from map layout.
	# The defender will walk among these positions to defend
	# its territory.
	if self.red:
	  centralX = (gameState.data.layout.width - 2)/2
	else:
	  centralX = ((gameState.data.layout.width - 2)/2) + 1
	self.noWallSpots = []
	for i in range(1, gameState.data.layout.height - 1):
	  if not gameState.hasWall(centralX, i):
		self.noWallSpots.append((centralX, i))
	# Remove some positions. The agent do not need to patrol
	# all positions in the central area.
	while len(self.noWallSpots) > (gameState.data.layout.height -2)/2:
	  self.noWallSpots.pop(0)
	  self.noWallSpots.pop(len(self.noWallSpots)-1)
	# Update probabilities to each patrol point.
	self.distFoodToPatrol(gameState)
Esempio n. 56
0
    def registerInitialState(self, gameState):
        self.start = gameState.getAgentPosition(self.index)
        CaptureAgent.registerInitialState(self, gameState)

        # 하프라인을 리스트로 받는다
        self.halfline = []
        if self.red:
            # 좌표가 0부터 시작하기 때문에, red팀이라면 전체 폭의 절반에서 3을 뺀것이 하프라인이다.
            halfline_x = gameState.data.layout.width // 2 - 3
        else:
            halfline_x = gameState.data.layout.width // 2 + 1
        # 벽이 아닌 부분만을 찾아내서 하프라인 리스트에 추가한다.
        for i in range(gameState.data.layout.height):
            if gameState.data.layout.walls[halfline_x][i]:
                pass
            else:
                self.halfline.append(((halfline_x), i))
        #x와 y라는 변수를 halfline list의 요소 중 다음과 같은 것으로 저장한다.
        #print(self.halfline)
        x, y = self.halfline[3 * len(self.halfline) // 4]
        self.place = (x, y)
        self.x, self.y = gameState.getWalls().asList()[-1]
    def registerInitialState(self, gameState):
        CaptureAgent.registerInitialState(self, gameState)

        # FInd the initial position, the mid point of the board, the position
        #where the agents can move, the maze distance and the position of all teh 4 agents.
        self.start = gameState.getInitialAgentPosition(self.index)
        self.widthCenter = gameState.data.layout.width / 2
        #self.legalPos = [p for p in gameState.getWalls().asList(False) if p[1] > 1]
        self.legalPos = []
        for pos in gameState.getWalls().asList(False):
            if pos[1] > 1:
                self.legalPos.append(pos)
        self.distancer.getMazeDistances()
        self.team = self.getTeam(gameState)
        self.enemies = self.getOpponents(gameState)

        # Initialize the likely position to be 1 at the initial position for each of the
        # opposition agents. create a dictionary that holsd the likely position of each agent.
        self.Likelypos = {}
        for e in self.enemies:
            self.Likelypos[e] = util.Counter()
            self.Likelypos[e][gameState.getInitialAgentPosition(e)] = 1.
Esempio n. 58
0
 def registerInitialState(self, gameState):
     self.start = gameState.getAgentPosition(self.index)
     CaptureAgent.registerInitialState(self, gameState)
     self.walls =  set(gameState.data.layout.walls.asList())
     self._maxx = max([item[0] for item in self.walls])
     self._maxy = max([item[1] for item in self.walls])
     self.sign = 1 if gameState.isOnRedTeam(self.index) else -1
     self.homeXBoundary = self.start[0] + ((self._maxx // 2 - 1) * self.sign)
     cells = [(self.homeXBoundary, y) for y in range(1, self._maxy)]
     self.homeBoundaryCells = [item for item in cells if item not in self.walls]
     available_states = self._getGrid(1, 1, self._maxx, self._maxy)
     self.available_actions = util.Counter()
     for cell in available_states:
         x, y = cell
         if (x - 1, y) in available_states:
             self.available_actions[cell] += 1
         if (x + 1, y) in available_states:
             self.available_actions[cell] += 1
         if (x, y - 1) in available_states:
             self.available_actions[cell] += 1
         if (x, y + 1) in available_states:
             self.available_actions[cell] += 1
Esempio n. 59
0
    def registerInitialState(self, gameState):
        print(gameState)
        print("Don't spy on me")

        self.start = gameState.getAgentPosition(self.index)
        CaptureAgent.registerInitialState(self, gameState)

        self.setup()
        self.cost = 1
        self.food = self.getFood(gameState).asList()
        self.totalFoodNumber = len(self.food)
        self.foodLeft = self.totalFoodNumber
        self.targetFood = self.getFood(gameState).asList()[0]
        self.carry = gameState.getAgentState(self.index).numCarrying
        self.setUpTarget(gameState)
        self.defence = False
        self.myPosition = gameState.getAgentPosition(self.index)
        self.myDistance = self.computeDistances(gameState)
        self.defenceTarget = self.start
        global targetFood
        targetFood = self.start
        print("initial target", self.targetFood)
Esempio n. 60
0
  def registerInitialState(self, gameState):
    """
    This method handles the initial setup of the
    agent to populate useful fields (such as what team
    we're on).

    A distanceCalculator instance caches the maze distances
    between each pair of positions, so your agents can use:
    self.distancer.getDistance(p1, p2)

    IMPORTANT: This method may run for at most 15 seconds.
    """

    '''
    Make sure you do not delete the following line. If you would like to
    use Manhattan distances instead of maze distances in order to save
    on initialization time, please take a look at
    CaptureAgent.registerInitialState in captureAgents.py.
    '''
    CaptureAgent.registerInitialState(self, gameState)

    '''
    Your initialization code goes here, if you need any.
    '''
    self.walls = gameState.getWalls()
    right = self.walls.width
    horizontalMid = (0 + right) / 2
    if self.index <= 1:
      for y in range(self.walls.height - 1):
        if not self.walls[horizontalMid][y]:
          self.start = (horizontalMid, y)
          break
    else:
      for y in range(self.walls.height - 1, 0, -1):
        if not self.walls[horizontalMid][y]:
          self.start = (horizontalMid, y)
          break
    print self.start